from tensorflow.keras.optimizers import Adam
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.layers import UpSampling2D
from keras import callbacks, optimizers
from keras.models import Model, load_model, model_from_json
from keras.preprocessing import image
from keras.layers import Input, Conv2D, MaxPooling2D, Activation, BatchNormalization, Conv2DTranspose, Concatenate
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
from keras import backend as K
import numpy as np
import pandas as pd
import cv2
import os
import matplotlib.pyplot as plt
%matplotlib inline
from PIL import Image
#from keras import utils
import re
import os
from keras.models import Model, load_model
import tensorflow as tf
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras import optimizers
from keras.layers import BatchNormalization
from tensorflow.keras.metrics import MeanIoU
import keras
# from keras.applications.resnet50 import ResNet50
# from keras import utils
# from keras.utils import plot_model
from skimage import color
import pickle
import matplotlib.pyplot as plt
import albumentations as album
"""
db = dbf.Dbf("massachusetts-buildings_corrected.dbf", new = True)
"""
dim = (256,256)
x_train = []
for root, dirnames, filenames in os.walk("png/train"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
image = cv2.imread(filepath)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image_resized = np.array(resized)
x_train.append(image_resized)
X_train = np.array(x_train)
print(X_train.shape)
y_train = []
for root, dirnames, filenames in os.walk("png/train_labels"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
image = cv2.imread(filepath)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image_resized = np.array(resized)
y_train.append(image_resized)
Y_train = np.array(y_train)
print(Y_train.shape)
x_test = []
for root, dirnames, filenames in os.walk("png/test"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
image = cv2.imread(filepath)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image_resized = np.array(resized)
x_test.append(image_resized)
X_test = np.array(x_test)
print(X_test.shape)
y_test = []
for root, dirnames, filenames in os.walk("png/test_labels"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
image = cv2.imread(filepath)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image_resized = np.array(resized)
y_test.append(image_resized)
Y_test = np.array(y_test)
print(Y_test.shape)
x_val = []
for root, dirnames, filenames in os.walk("png/val"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
image = cv2.imread(filepath)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image_resized = np.array(resized)
x_val.append(image_resized)
X_val = np.array(x_val)
print(X_val.shape)
y_val = []
for root, dirnames, filenames in os.walk("png/val_labels"):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
image = cv2.imread(filepath)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image_resized = np.array(resized)
y_val.append(image_resized)
Y_val = np.array(y_val)
print(Y_val.shape)
(137, 256, 256, 3) (137, 256, 256, 3) (10, 256, 256, 3) (10, 256, 256, 3) (4, 256, 256, 3) (4, 256, 256, 3)
def convert_to_gray(image):
print(image.shape)
roadlabelgray = []
for i in range(image.shape[0]):
roadlabelgray.append(cv2.cvtColor(image[i], cv2.COLOR_BGR2GRAY))
roadlabelgray = np.asarray(roadlabelgray)
#del
image_1 = np.expand_dims(roadlabelgray, -1)
print(image_1.shape)
return image_1
# Converting the images in grey scale
Y_train_grey=convert_to_gray(Y_train)
Y_test_grey=convert_to_gray(Y_test)
X_test_grey=convert_to_gray(X_test)
(137, 256, 256, 3) (137, 256, 256, 1) (10, 256, 256, 3) (10, 256, 256, 1) (10, 256, 256, 3) (10, 256, 256, 1)
X_test_grey=convert_to_gray(X_train)
(137, 256, 256, 3) (137, 256, 256, 1)
image_shape = (256,256)
from tensorflow.keras import backend as K
def dice_coef1(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + 0.0001) / (K.sum(y_true_f) + K.sum(y_pred_f) + 0.0001)
def dice_coef_loss1(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
def Unet(num_classes = 1, input_shape= (image_shape[0],image_shape[1], 3)):
inp = Input(input_shape)
block_conv = Conv2D(64,(3,3),padding = 'same')(inp)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(64,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
output_block_one = Activation('relu')(block_conv)
block_conv = MaxPooling2D()(output_block_one)
block_conv = Conv2D(128,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(128,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
output_block_two = Activation('relu')(block_conv)
block_conv = MaxPooling2D()(output_block_two)
block_conv = Conv2D(256,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(256,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(256,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
output_block_three = Activation('relu')(block_conv)
block_conv = MaxPooling2D()(output_block_three)
block_conv = Conv2D(512,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(512,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(512,(3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
output_block_four = Activation('relu')(block_conv)
block_conv = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(output_block_four)
block_conv = BatchNormalization()(block_conv)
output_block_four = Activation('relu')(block_conv)
block_conv = Concatenate()([block_conv, output_block_three])
block_conv = Conv2D(256, (3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(256, (3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
output_block_four = Activation('relu')(block_conv)
block_conv = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Concatenate()([block_conv, output_block_two])
block_conv = Conv2D(128, (3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(128, (3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(block_conv)
block_conv = BatchNormalization()(block_conv)
output_block_four = Activation('relu')(block_conv)
block_conv = Concatenate()([block_conv, output_block_one])
block_conv = Conv2D(64, (3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(64, (3,3), padding = 'same')(block_conv)
block_conv = BatchNormalization()(block_conv)
block_conv = Activation('relu')(block_conv)
block_conv = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(block_conv)
model = Model(inputs=inp, outputs=block_conv)
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
adam = tf.keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer = adam, loss=dice_coef_loss1,metrics=[dice_coef1])
return model
model = Unet()
model.summary()
Model: "model_4"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_13 (InputLayer) [(None, 256, 256, 3 0 []
)]
conv2d_131 (Conv2D) (None, 256, 256, 64 1792 ['input_13[0][0]']
)
batch_normalization_193 (Batch (None, 256, 256, 64 256 ['conv2d_131[0][0]']
Normalization) )
activation_193 (Activation) (None, 256, 256, 64 0 ['batch_normalization_193[0][0]']
)
conv2d_132 (Conv2D) (None, 256, 256, 64 36928 ['activation_193[0][0]']
)
batch_normalization_194 (Batch (None, 256, 256, 64 256 ['conv2d_132[0][0]']
Normalization) )
activation_194 (Activation) (None, 256, 256, 64 0 ['batch_normalization_194[0][0]']
)
max_pooling2d_31 (MaxPooling2D (None, 128, 128, 64 0 ['activation_194[0][0]']
) )
conv2d_133 (Conv2D) (None, 128, 128, 12 73856 ['max_pooling2d_31[0][0]']
8)
batch_normalization_195 (Batch (None, 128, 128, 12 512 ['conv2d_133[0][0]']
Normalization) 8)
activation_195 (Activation) (None, 128, 128, 12 0 ['batch_normalization_195[0][0]']
8)
conv2d_134 (Conv2D) (None, 128, 128, 12 147584 ['activation_195[0][0]']
8)
batch_normalization_196 (Batch (None, 128, 128, 12 512 ['conv2d_134[0][0]']
Normalization) 8)
activation_196 (Activation) (None, 128, 128, 12 0 ['batch_normalization_196[0][0]']
8)
max_pooling2d_32 (MaxPooling2D (None, 64, 64, 128) 0 ['activation_196[0][0]']
)
conv2d_135 (Conv2D) (None, 64, 64, 256) 295168 ['max_pooling2d_32[0][0]']
batch_normalization_197 (Batch (None, 64, 64, 256) 1024 ['conv2d_135[0][0]']
Normalization)
activation_197 (Activation) (None, 64, 64, 256) 0 ['batch_normalization_197[0][0]']
conv2d_136 (Conv2D) (None, 64, 64, 256) 590080 ['activation_197[0][0]']
batch_normalization_198 (Batch (None, 64, 64, 256) 1024 ['conv2d_136[0][0]']
Normalization)
activation_198 (Activation) (None, 64, 64, 256) 0 ['batch_normalization_198[0][0]']
conv2d_137 (Conv2D) (None, 64, 64, 256) 590080 ['activation_198[0][0]']
batch_normalization_199 (Batch (None, 64, 64, 256) 1024 ['conv2d_137[0][0]']
Normalization)
activation_199 (Activation) (None, 64, 64, 256) 0 ['batch_normalization_199[0][0]']
max_pooling2d_33 (MaxPooling2D (None, 32, 32, 256) 0 ['activation_199[0][0]']
)
conv2d_138 (Conv2D) (None, 32, 32, 512) 1180160 ['max_pooling2d_33[0][0]']
batch_normalization_200 (Batch (None, 32, 32, 512) 2048 ['conv2d_138[0][0]']
Normalization)
activation_200 (Activation) (None, 32, 32, 512) 0 ['batch_normalization_200[0][0]']
conv2d_139 (Conv2D) (None, 32, 32, 512) 2359808 ['activation_200[0][0]']
batch_normalization_201 (Batch (None, 32, 32, 512) 2048 ['conv2d_139[0][0]']
Normalization)
activation_201 (Activation) (None, 32, 32, 512) 0 ['batch_normalization_201[0][0]']
conv2d_140 (Conv2D) (None, 32, 32, 512) 2359808 ['activation_201[0][0]']
batch_normalization_202 (Batch (None, 32, 32, 512) 2048 ['conv2d_140[0][0]']
Normalization)
activation_202 (Activation) (None, 32, 32, 512) 0 ['batch_normalization_202[0][0]']
conv2d_transpose_17 (Conv2DTra (None, 64, 64, 256) 524544 ['activation_202[0][0]']
nspose)
batch_normalization_203 (Batch (None, 64, 64, 256) 1024 ['conv2d_transpose_17[0][0]']
Normalization)
concatenate_29 (Concatenate) (None, 64, 64, 512) 0 ['batch_normalization_203[0][0]',
'activation_199[0][0]']
conv2d_141 (Conv2D) (None, 64, 64, 256) 1179904 ['concatenate_29[0][0]']
batch_normalization_204 (Batch (None, 64, 64, 256) 1024 ['conv2d_141[0][0]']
Normalization)
activation_204 (Activation) (None, 64, 64, 256) 0 ['batch_normalization_204[0][0]']
conv2d_142 (Conv2D) (None, 64, 64, 256) 590080 ['activation_204[0][0]']
batch_normalization_205 (Batch (None, 64, 64, 256) 1024 ['conv2d_142[0][0]']
Normalization)
conv2d_transpose_18 (Conv2DTra (None, 128, 128, 12 131200 ['batch_normalization_205[0][0]']
nspose) 8)
batch_normalization_206 (Batch (None, 128, 128, 12 512 ['conv2d_transpose_18[0][0]']
Normalization) 8)
activation_206 (Activation) (None, 128, 128, 12 0 ['batch_normalization_206[0][0]']
8)
concatenate_30 (Concatenate) (None, 128, 128, 25 0 ['activation_206[0][0]',
6) 'activation_196[0][0]']
conv2d_143 (Conv2D) (None, 128, 128, 12 295040 ['concatenate_30[0][0]']
8)
batch_normalization_207 (Batch (None, 128, 128, 12 512 ['conv2d_143[0][0]']
Normalization) 8)
activation_207 (Activation) (None, 128, 128, 12 0 ['batch_normalization_207[0][0]']
8)
conv2d_144 (Conv2D) (None, 128, 128, 12 147584 ['activation_207[0][0]']
8)
batch_normalization_208 (Batch (None, 128, 128, 12 512 ['conv2d_144[0][0]']
Normalization) 8)
activation_208 (Activation) (None, 128, 128, 12 0 ['batch_normalization_208[0][0]']
8)
conv2d_transpose_19 (Conv2DTra (None, 256, 256, 64 32832 ['activation_208[0][0]']
nspose) )
batch_normalization_209 (Batch (None, 256, 256, 64 256 ['conv2d_transpose_19[0][0]']
Normalization) )
concatenate_31 (Concatenate) (None, 256, 256, 12 0 ['batch_normalization_209[0][0]',
8) 'activation_194[0][0]']
conv2d_145 (Conv2D) (None, 256, 256, 64 73792 ['concatenate_31[0][0]']
)
batch_normalization_210 (Batch (None, 256, 256, 64 256 ['conv2d_145[0][0]']
Normalization) )
activation_210 (Activation) (None, 256, 256, 64 0 ['batch_normalization_210[0][0]']
)
conv2d_146 (Conv2D) (None, 256, 256, 64 36928 ['activation_210[0][0]']
)
batch_normalization_211 (Batch (None, 256, 256, 64 256 ['conv2d_146[0][0]']
Normalization) )
activation_211 (Activation) (None, 256, 256, 64 0 ['batch_normalization_211[0][0]']
)
conv2d_147 (Conv2D) (None, 256, 256, 1) 577 ['activation_211[0][0]']
==================================================================================================
Total params: 10,663,873
Trainable params: 10,655,809
Non-trainable params: 8,064
__________________________________________________________________________________________________
C:\Users\smith\anaconda3\lib\site-packages\keras\optimizer_v2\adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. super(Adam, self).__init__(name, **kwargs)
batch_size = 2
history = model.fit(X_train, Y_train_grey.astype(np.float16)/255, epochs=50, verbose=1, workers=1, batch_size=4)
Epoch 1/50 35/35 [==============================] - 162s 5s/step - loss: 0.6997 - dice_coef1: 0.2983 Epoch 2/50 35/35 [==============================] - 157s 4s/step - loss: 0.6223 - dice_coef1: 0.3786 Epoch 3/50 35/35 [==============================] - 157s 4s/step - loss: 0.5662 - dice_coef1: 0.4334 Epoch 4/50 35/35 [==============================] - 156s 4s/step - loss: 0.5308 - dice_coef1: 0.4700 Epoch 5/50 35/35 [==============================] - 156s 4s/step - loss: 0.5057 - dice_coef1: 0.4881 Epoch 6/50 35/35 [==============================] - 157s 4s/step - loss: 0.4833 - dice_coef1: 0.5166 Epoch 7/50 35/35 [==============================] - 156s 4s/step - loss: 0.4663 - dice_coef1: 0.5336 Epoch 8/50 35/35 [==============================] - 156s 4s/step - loss: 0.4512 - dice_coef1: 0.5443 Epoch 9/50 35/35 [==============================] - 157s 4s/step - loss: 0.4416 - dice_coef1: 0.5587 Epoch 10/50 35/35 [==============================] - 156s 4s/step - loss: 0.4262 - dice_coef1: 0.5744 Epoch 11/50 35/35 [==============================] - 156s 4s/step - loss: 0.4222 - dice_coef1: 0.5719 Epoch 12/50 35/35 [==============================] - 157s 4s/step - loss: 0.4206 - dice_coef1: 0.5809 Epoch 13/50 35/35 [==============================] - 156s 4s/step - loss: 0.4016 - dice_coef1: 0.5927 Epoch 14/50 35/35 [==============================] - 156s 4s/step - loss: 0.3931 - dice_coef1: 0.6078 Epoch 15/50 35/35 [==============================] - 157s 4s/step - loss: 0.3840 - dice_coef1: 0.6170 Epoch 16/50 35/35 [==============================] - 156s 4s/step - loss: 0.3729 - dice_coef1: 0.6296 Epoch 17/50 35/35 [==============================] - 157s 4s/step - loss: 0.3666 - dice_coef1: 0.6328 Epoch 18/50 35/35 [==============================] - 157s 4s/step - loss: 0.3541 - dice_coef1: 0.6463 Epoch 19/50 35/35 [==============================] - 157s 4s/step - loss: 0.3549 - dice_coef1: 0.6450 Epoch 20/50 35/35 [==============================] - 157s 4s/step - loss: 0.3477 - dice_coef1: 0.6528 Epoch 21/50 35/35 [==============================] - 161s 5s/step - loss: 0.3428 - dice_coef1: 0.6565 Epoch 22/50 35/35 [==============================] - 157s 4s/step - loss: 0.3318 - dice_coef1: 0.6636 Epoch 23/50 35/35 [==============================] - 158s 5s/step - loss: 0.3317 - dice_coef1: 0.6659 Epoch 24/50 35/35 [==============================] - 157s 4s/step - loss: 0.3282 - dice_coef1: 0.6696 Epoch 25/50 35/35 [==============================] - 156s 4s/step - loss: 0.3328 - dice_coef1: 0.6634 Epoch 26/50 35/35 [==============================] - 157s 4s/step - loss: 0.3159 - dice_coef1: 0.6818 Epoch 27/50 35/35 [==============================] - 157s 4s/step - loss: 0.3141 - dice_coef1: 0.6876 Epoch 28/50 35/35 [==============================] - 157s 4s/step - loss: 0.3099 - dice_coef1: 0.6901 Epoch 29/50 35/35 [==============================] - 158s 5s/step - loss: 0.2965 - dice_coef1: 0.7034 Epoch 30/50 35/35 [==============================] - 158s 5s/step - loss: 0.3013 - dice_coef1: 0.6985 Epoch 31/50 35/35 [==============================] - 158s 5s/step - loss: 0.2976 - dice_coef1: 0.6949 Epoch 32/50 35/35 [==============================] - 157s 4s/step - loss: 0.3006 - dice_coef1: 0.6997 Epoch 33/50 35/35 [==============================] - 156s 4s/step - loss: 0.2934 - dice_coef1: 0.7067 Epoch 34/50 35/35 [==============================] - 157s 4s/step - loss: 0.2872 - dice_coef1: 0.7155 Epoch 35/50 35/35 [==============================] - 158s 5s/step - loss: 0.2847 - dice_coef1: 0.7154 Epoch 36/50 35/35 [==============================] - 156s 4s/step - loss: 0.2887 - dice_coef1: 0.7095 Epoch 37/50 35/35 [==============================] - 156s 4s/step - loss: 0.2838 - dice_coef1: 0.7155 Epoch 38/50 35/35 [==============================] - 156s 4s/step - loss: 0.2759 - dice_coef1: 0.7230 Epoch 39/50 35/35 [==============================] - 157s 4s/step - loss: 0.2749 - dice_coef1: 0.7200 Epoch 40/50 35/35 [==============================] - 156s 4s/step - loss: 0.2911 - dice_coef1: 0.7085 Epoch 41/50 35/35 [==============================] - 157s 4s/step - loss: 0.2768 - dice_coef1: 0.7219 Epoch 42/50 35/35 [==============================] - 156s 4s/step - loss: 0.2702 - dice_coef1: 0.7300 Epoch 43/50 35/35 [==============================] - 156s 4s/step - loss: 0.2708 - dice_coef1: 0.7293 Epoch 44/50 35/35 [==============================] - 156s 4s/step - loss: 0.2657 - dice_coef1: 0.7278 Epoch 45/50 35/35 [==============================] - 157s 4s/step - loss: 0.2759 - dice_coef1: 0.7238 Epoch 46/50 35/35 [==============================] - 157s 4s/step - loss: 0.2688 - dice_coef1: 0.7321 Epoch 47/50 35/35 [==============================] - 160s 5s/step - loss: 0.2610 - dice_coef1: 0.7404 Epoch 48/50 35/35 [==============================] - 157s 4s/step - loss: 0.2612 - dice_coef1: 0.7279 Epoch 49/50 35/35 [==============================] - 156s 4s/step - loss: 0.2608 - dice_coef1: 0.7415 Epoch 50/50 35/35 [==============================] - 157s 4s/step - loss: 0.2581 - dice_coef1: 0.7409
predict = model.predict(X_test, batch_size = 5)
def displayimages(**images):
n_images = len(images)
plt.figure(figsize=(16,8))
for idx,(name,image) in enumerate(images.items()):
plt.subplot(1, n_images, idx + 1)
plt.xticks([]);
plt.yticks([])
plt.title(name.replace('_',' ').title(), fontsize=20)
plt.imshow(image)
plt.show()
for i in range(9): # test results with 50 Epochs
displayimages(
original_image = X_test[i],
ground_truth_mask = Y_test_grey[i],
predicted_mask = predict[i])